Quản lý bất động sản trong c# (Ứng dụng Windows)

1
2 //Multiple face detection and recognition
in real time
3 //Using EmguCV cross platform .Net wrapper to the Intel OpenCV image processing library
for C#.Net
4 //Writed
by Sergio Andrés Guitérrez Rojas
5 //
"Serg3ant" for the delveloper comunity
6 // Sergiogut1805@hotmail.com
7 //Regards
from Bucaramanga-Colombia ;)
8
9 using
System;
10 using
System.Collections.Generic;
11 using
System.Drawing;
12 using
System.Windows.Forms;
13 using
Emgu.CV;
14 using
Emgu.CV.Structure;
15 using
Emgu.CV.CvEnum;
16 using
System.IO;
17 using
System.Diagnostics;
18
19 namespace
MultiFaceRec
20 {
21     
public partial class FrmPrincipal : Form
22     {
23         
//Declararation of all variables, vectors and haarcascades
24         Image<Bgr, Byte> currentFrame;
25         Capture grabber;
26         HaarCascade face;
27         HaarCascade eye;
28         MCvFont font =
new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
29         Image<Gray,
byte> result, TrainedFace = null;
30         Image<Gray,
byte> gray = null;
31         List<Image<Gray,
byte>> trainingImages = new List<Image<Gray, byte>>();
32         List<
string> labels= new List<string>();
33         List<
string> NamePersons = new List<string>();
34         
int ContTrain, NumLabels, t;
35         
string name, names = null;
36
37
38         
public FrmPrincipal()
39         {
40             InitializeComponent();
41             
//Load haarcascades for face detection
42             face =
new HaarCascade("haarcascade_frontalface_default.xml");
43             
//eye = new HaarCascade("haarcascade_eye.xml");
44             
try
45             {
46                 
//Load of previus trainned faces and labels for each image
47                 
string Labelsinfo = File.ReadAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt");
48                 
string[] Labels = Labelsinfo.Split('%');
49                 NumLabels = Convert.ToInt16(Labels[
0]);
50                 ContTrain = NumLabels;
51                 
string LoadFaces;
52
53                 
for (int tf = 1; tf < NumLabels+1; tf++)
54                 {
55                     LoadFaces =
"face" + tf + ".bmp";
56                     trainingImages.Add(
new Image<Gray, byte>(Application.StartupPath + "/TrainedFaces/" + LoadFaces));
57                     labels.Add(Labels[tf]);
58                 }
59             
60             }
61             
catch(Exception e)
62             {
63                 
//MessageBox.Show(e.ToString());
64                 MessageBox.Show(
"Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
65             }
66
67         }
68
69
70         
private void button1_Click(object sender, EventArgs e)
71         {
72             
//Initialize the capture device
73             grabber =
new Capture();
74             grabber.QueryFrame();
75             
//Initialize the FrameGraber event
76             Application.Idle +=
new EventHandler(FrameGrabber);
77             button1.Enabled =
false;
78         }
79
80
81         
private void button2_Click(object sender, System.EventArgs e)
82         {
83             
try
84             {
85                 
//Trained face counter
86                 ContTrain = ContTrain +
1;
87
88                 
//Get a gray frame from capture device
89                 gray = grabber.QueryGrayFrame().Resize(
320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
90
91                 
//Face Detector
92                 MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
93                 face,
94                 
1.2,
95                 
10,
96                 Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
97                 
new Size(20, 20));
98
99                 
//Action for each element detected
100                 
foreach (MCvAvgComp f in facesDetected[0])
101                 {
102                     TrainedFace = currentFrame.Copy(f.rect).Convert<Gray,
byte>();
103                     
break;
104                 }
105
106                 
//resize face detected image for force to compare the same size with the
107                 
//test image with cubic interpolation type method
108                 TrainedFace = result.Resize(
100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
109                 trainingImages.Add(TrainedFace);
110                 labels.Add(textBox1.Text);
111
112                 
//Show face added in gray scale
113                 imageBox1.Image = TrainedFace;
114
115                 
//Write the number of triained faces in a file text for further load
116                 File.WriteAllText(Application.StartupPath +
"/TrainedFaces/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");
117
118                 
//Write the labels of triained faces in a file text for further load
119                 
for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
120                 {
121                     trainingImages.ToArray()[i -
1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
122                     File.AppendAllText(Application.StartupPath +
"/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");
123                 }
124
125                 MessageBox.Show(textBox1.Text +
"´s face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
126             }
127             
catch
128             {
129                 MessageBox.Show(
"Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
130             }
131         }
132
133
134         
void FrameGrabber(object sender, EventArgs e)
135         {
136             label3.Text =
"0";
137             
//label4.Text = "";
138             NamePersons.Add(
"");
139
140
141             
//Get the current frame form capture device
142             currentFrame = grabber.QueryFrame().Resize(
320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
143
144                     
//Convert it to Grayscale
145                     gray = currentFrame.Convert<Gray, Byte>();
146
147                     
//Face Detector
148                     MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
149                   face,
150                   
1.2,
151                   
10,
152                   Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
153                   
new Size(20, 20));
154
155                     
//Action for each element detected
156                     
foreach (MCvAvgComp f in facesDetected[0])
157                     {
158                         t = t +
1;
159                         result = currentFrame.Copy(f.rect).Convert<Gray,
byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
160                         
//draw the face detected in the 0th (gray) channel with blue color
161                         currentFrame.Draw(f.rect,
new Bgr(Color.Red), 2);
162
163
164                         
if (trainingImages.ToArray().Length != 0)
165                         {
166                             
//TermCriteria for face recognition with numbers of trained images like maxIteration
167                         MCvTermCriteria termCrit =
new MCvTermCriteria(ContTrain, 0.001);
168
169                         
//Eigen face recognizer
170                         EigenObjectRecognizer recognizer =
new EigenObjectRecognizer(
171                            trainingImages.ToArray(),
172                            labels.ToArray(),
173                            
3000,
174                            
ref termCrit);
175
176                         name = recognizer.Recognize(result);
177
178                             
//Draw the label for each face detected and recognized
179                         currentFrame.Draw(name,
ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
180
181                         }
182
183                             NamePersons[t-
1] = name;
184                             NamePersons.Add(
"");
185
186
187                         
//Set the number of faces detected on the scene
188                         label3.Text = facesDetected[
0].Length.ToString();
189                        
190                         
/*
191                         //Set the region of interest
on the faces
192                         
193                         gray.ROI = f.rect;
194                         MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
195                            eye,
196                            
1.1,
197                            
10,
198                            Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
199                            
new Size(20, 20));
200                         gray.ROI = Rectangle.Empty;
201
202                         
foreach (MCvAvgComp ey in eyesDetected[0])
203                         {
204                             Rectangle eyeRect = ey.rect;
205                             eyeRect.Offset(f.rect.X, f.rect.Y);
206                             currentFrame.Draw(eyeRect,
new Bgr(Color.Blue), 2);
207                         }
208                          */

209
210                     }
211                         t =
0;
212
213                         
//Names concatenation of persons recognized
214                     
for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
215                     {
216                         names = names + NamePersons[nnn] +
", ";
217                     }
218                     
//Show the faces procesed and recognized
219                     imageBoxFrameGrabber.Image = currentFrame;
220                     label4.Text = names;
221                     names =
"";
222                     
//Clear the list(vector) of names
223                     NamePersons.Clear();
224
225                 }
226
227   
228
229
230        
231
232     }
233 }


Gõ tìm kiếm nhanh...